Remove VHPT_ADDR by mapping vhpt to xen identity mapping area.
and some clean ups.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
#endif
rsm psr.ic // interrupts (psr.i) are already disabled here
movl r25=PAGE_KERNEL
+ movl r26 = IA64_GRANULE_SHIFT << 2
;;
srlz.d
or r23=r25,r20 // construct PA | page properties
- mov r25=IA64_GRANULE_SHIFT<<2
+ ptr.d in0, r26 // to purge dtr[IA64_TR_VHPT]
;;
- mov cr.itir=r25
+ mov cr.itir=r26
mov cr.ifa=in0 // VA of next task...
+ srlz.d
;;
mov r25=IA64_TR_CURRENT_STACK
#ifdef XEN
extern char ia64_ivt;
context_saved(prev);
+ ia64_disable_vhpt_walker();
if (VMX_DOMAIN(current)) {
vmx_do_launch(current);
migrate_timer(¤t->arch.arch_vmx.vtm.vtm_timer,
current->processor);
} else {
ia64_set_iva(&ia64_ivt);
- ia64_disable_vhpt_walker();
load_region_regs(current);
ia64_set_pta(vcpu_pta(current));
vcpu_load_kernel_regs(current);
}
if (VMX_DOMAIN(next))
vmx_load_state(next);
+
+ ia64_disable_vhpt_walker();
/*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
prev = ia64_switch_to(next);
nd = current->domain;
if (!is_idle_domain(nd)) {
- ia64_disable_vhpt_walker();
load_region_regs(current);
ia64_set_pta(vcpu_pta(current));
vcpu_load_kernel_regs(current);
* walker. Then all accesses happen within idle context will
* be handled by TR mapping and identity mapping.
*/
- ia64_disable_vhpt_walker();
__ia64_per_cpu_var(current_psr_i_addr) = NULL;
__ia64_per_cpu_var(current_psr_ic_addr) = NULL;
}
#include <asm/vcpu.h>
/* Defined in xemasm.S */
-extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long p_vhpt);
+extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long va_vhpt);
/* RID virtualization mechanism is really simple: domains have less rid bits
than the host and the host rid space is shared among the domains. (Values
} else if (rreg == 7) {
ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
v->arch.privregs, v->domain->arch.shared_info_va,
- vcpu_vhpt_maddr(v));
+ __va_ul(vcpu_vhpt_maddr(v)));
} else {
set_rr(rr,newrrv.rrval);
}
DEFINE_PER_CPU (unsigned long, vhpt_pend);
static void
- __vhpt_flush(unsigned long vhpt_maddr)
+__vhpt_flush(unsigned long vhpt_maddr)
{
struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr);
int i;
v->arch.pta.ve = 1; // enable vhpt
v->arch.pta.size = VHPT_SIZE_LOG2;
v->arch.pta.vf = 1; // long format
- //v->arch.pta.base = __va(v->arch.vhpt_maddr) >> 15;
- v->arch.pta.base = VHPT_ADDR >> 15;
+ v->arch.pta.base = __va_ul(v->arch.vhpt_maddr) >> 15;
vhpt_erase(v->arch.vhpt_maddr);
smp_mb(); // per vcpu vhpt may be used by another physical cpu.
while ((long)addr_range > 0) {
/* Get the VHPT entry. */
- unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
+ unsigned int off = ia64_thash(vadr) -
+ __va_ul(vcpu_vhpt_maddr(current));
struct vhpt_lf_entry *v = vhpt_base + off;
v->ti_tag = INVALID_TI_TAG;
addr_range -= PAGE_SIZE;
void domain_flush_tlb_vhpt(struct domain *d)
{
/* Very heavy... */
- if (HAS_PERVCPU_VHPT(d) /* || VMX_DOMAIN(v) */)
+ if (HAS_PERVCPU_VHPT(d) || d->arch.is_vti)
on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
else
on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
// void *shared_info, /* in1 */
// void *shared_arch_info, /* in2 */
// unsigned long shared_info_va, /* in3 */
-// unsigned long p_vhpt) /* in4 */
+// unsigned long va_vhpt) /* in4 */
//Local usage:
// loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse
// loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr,
+// r16, r19, r20 are used by ia64_switch_mode_{phys, virt}()
GLOBAL_ENTRY(ia64_new_rr7)
// FIXME? not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
// VHPT
#if VHPT_ENABLED
- mov r24=VHPT_SIZE_LOG2<<2
- movl r22=VHPT_ADDR
+#if IA64_GRANULE_SHIFT < VHPT_SIZE_LOG2
+#error "it must be that VHPT_SIZE_LOG2 <= IA64_GRANULE_SHIFT"
+#endif
+ // unless overlaps with KERNEL_TR and IA64_TR_CURRENT_STACK
+ dep r14=0,in4,0,KERNEL_TR_PAGE_SHIFT
+ dep r15=0,in4,0,IA64_GRANULE_SHIFT
+ dep r21=0,r13,0,IA64_GRANULE_SHIFT
+ ;;
+ cmp.eq p7,p0=r17,r14
+ cmp.eq p8,p0=r15,r21
+(p7) br.cond.sptk .vhpt_overlaps
+(p8) br.cond.sptk .vhpt_overlaps
mov r21=IA64_TR_VHPT
+ dep r22=0,r15,60,4 // physical address of
+ // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
+ mov r24=IA64_GRANULE_SHIFT<<2
;;
- ptr.d r22,r24
- or r23=in4,r26 // construct PA | page properties
+ ptr.d r15,r24
+ or r23=r22,r26 // construct PA | page properties
mov cr.itir=r24
- mov cr.ifa=r22
+ mov cr.ifa=r15
+ srlz.d
;;
itr.d dtr[r21]=r23 // wire in new mapping...
+.vhpt_overlaps:
#endif
// Shared info
if (HAS_PERVCPU_VHPT(v->domain))
return v->arch.pta.val;
#endif
- return VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED;
+ return __va_ul(__get_cpu_var(vhpt_paddr)) | (1 << 8) |
+ (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED;
}
#endif /* !__ASSEMBLY */
#define GATE_ADDR KERNEL_START
#define DEFAULT_SHAREDINFO_ADDR 0xf100000000000000
#define PERCPU_ADDR (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
-#define VHPT_ADDR 0xf200000000000000
#ifdef CONFIG_VIRTUAL_FRAME_TABLE
#define VIRT_FRAME_TABLE_ADDR 0xf300000000000000
#define VIRT_FRAME_TABLE_END 0xf400000000000000